flag_package: Option<String>,
flag_jobs: Option<uint>,
flag_features: Vec<String>,
+ flag_name: Option<String>,
flag_no_default_features: bool,
flag_target: Option<String>,
flag_manifest_path: Option<String>,
Options:
-h, --help Print this message
+ --name NAME Name of the bench to run
--no-run Compile, but don't run benchmarks
-p SPEC, --package SPEC Package to run benchmarks for
-j N, --jobs N The number of jobs to run in parallel
shell.set_verbose(options.flag_verbose);
let mut ops = ops::TestOptions {
+ name: options.flag_name.as_ref().map(|s| s.as_slice()),
no_run: options.flag_no_run,
compile_opts: ops::CompileOptions {
env: "bench",
flag_features: Vec<String>,
flag_jobs: Option<uint>,
flag_manifest_path: Option<String>,
+ flag_name: Option<String>,
flag_no_default_features: bool,
flag_no_run: bool,
flag_package: Option<String>,
Options:
-h, --help Print this message
+ --name NAME Name of the test to run
--no-run Compile, but don't run tests
-p SPEC, --package SPEC Package to run tests for
-j N, --jobs N The number of jobs to run in parallel
shell.set_verbose(options.flag_verbose);
let mut ops = ops::TestOptions {
+ name: options.flag_name.as_ref().map(|s| s.as_slice()),
no_run: options.flag_no_run,
compile_opts: ops::CompileOptions {
env: "test",
pub libraries: HashMap<PackageId, Vec<Path>>,
/// An array of all tests created during this compilation.
- pub tests: Vec<Path>,
+ pub tests: Vec<(String, Path)>,
/// An array of all binaries created.
pub binaries: Vec<Path>,
pairs.push((old_root.join(filename), root.join(filename)));
if target.get_profile().is_test() {
- cx.compilation.tests.push(dst.clone());
+ cx.compilation.tests.push((target.get_name().into_string(), dst.clone()));
} else if target.is_bin() {
cx.compilation.binaries.push(dst.clone());
} else if target.is_lib() {
pub struct TestOptions<'a> {
pub compile_opts: ops::CompileOptions<'a>,
pub no_run: bool,
+ pub name: Option<&'a str>,
}
pub fn run_tests(manifest_path: &Path,
if options.no_run { return Ok(None) }
compile.tests.sort();
+ let target_name = options.name;
+ let mut tests_to_run = compile.tests.iter().filter(|&&(ref test_name, _)| {
+ target_name.map_or(true, |target_name| target_name == test_name.as_slice())
+ });
+
let cwd = os::getcwd();
- for exe in compile.tests.iter() {
+ for &(_, ref exe) in tests_to_run {
let to_display = match exe.path_relative_from(&cwd) {
Some(path) => path,
None => exe.clone(),
}
}
+ if options.name.is_some() { return Ok(None) }
+
if options.compile_opts.env == "bench" { return Ok(None) }
let mut libs = compile.package.get_targets().iter().filter_map(|target| {
RUNNING)));
})
+test!(bench_target_name {
+ let prj = project("foo")
+ .file("Cargo.toml" , r#"
+ [package]
+ name = "foo"
+ version = "0.0.1"
+ authors = []
+
+ [[bin]]
+ name="bin1"
+ path="src/bin1.rs"
+
+ [[bin]]
+ name="bin2"
+ path="src/bin2.rs"
+ "#)
+ .file("src/bin1.rs", r#"
+ extern crate test;
+ #[bench] fn run1(_ben: &mut test::Bencher) { }"#)
+ .file("src/bin2.rs", r#"
+ extern crate test;
+ #[bench] fn run2(_ben: &mut test::Bencher) { }"#);
+
+ let expected_stdout = format!("\
+{compiling} foo v0.0.1 ({dir})
+{runnning} target[..]release[..]bin2[..]
+
+running 1 test
+test run2 ... bench: 0 ns/iter (+/- 0)
+
+test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured
+
+",
+ compiling = COMPILING,
+ runnning = RUNNING,
+ dir = prj.url());
+
+ assert_that(prj.cargo_process("bench").arg("--name").arg("bin2"),
+ execs().with_status(0).with_stdout(expected_stdout.as_slice()));
+})
+
test!(cargo_bench_verbose {
let p = project("foo")
.file("Cargo.toml", basic_bin_manifest("foo").as_slice())
dir = p.url()).as_slice()));
})
+test!(test_run_specific_bin_target {
+ let prj = project("foo")
+ .file("Cargo.toml" , r#"
+ [package]
+ name = "foo"
+ version = "0.0.1"
+ authors = []
+
+ [[bin]]
+ name="bin1"
+ path="src/bin1.rs"
+
+ [[bin]]
+ name="bin2"
+ path="src/bin2.rs"
+ "#)
+ .file("src/bin1.rs", "#[test] fn test1() { }")
+ .file("src/bin2.rs", "#[test] fn test2() { }");
+
+ let expected_stdout = format!("\
+{compiling} foo v0.0.1 ({dir})
+{running} target[..]bin2-[..]
+
+running 1 test
+test test2 ... ok
+
+test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured
+
+",
+ compiling = COMPILING,
+ running = RUNNING,
+ dir = prj.url());
+
+ assert_that(prj.cargo_process("test").arg("--name").arg("bin2"),
+ execs().with_status(0).with_stdout(expected_stdout.as_slice()));
+})
+
+test!(test_run_specific_test_target {
+ let prj = project("foo")
+ .file("Cargo.toml" , r#"
+ [package]
+ name = "foo"
+ version = "0.0.1"
+ authors = []
+ "#)
+ .file("src/bin/a.rs", "fn main() { }")
+ .file("src/bin/b.rs", "#[test] fn test_b() { } fn main() { }")
+ .file("tests/a.rs", "#[test] fn test_a() { }")
+ .file("tests/b.rs", "#[test] fn test_b() { }");
+
+ let expected_stdout = format!("\
+{compiling} foo v0.0.1 ({dir})
+{running} target[..]b-[..]
+
+running 1 test
+test test_b ... ok
+
+test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured
+
+{running} target[..]b-[..]
+
+running 1 test
+test test_b ... ok
+
+test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured
+
+",
+ compiling = COMPILING,
+ running = RUNNING,
+ dir = prj.url());
+
+ assert_that(prj.cargo_process("test").arg("--name").arg("b"),
+ execs().with_status(0).with_stdout(expected_stdout.as_slice()));
+})
+
test!(test_no_harness {
let p = project("foo")
.file("Cargo.toml", r#"